#define VMALLOC_VMADDR(x) ((unsigned long)(x))
#endif
-#define BLKIF_HASHSZ 1024
-#define BLKIF_HASH(_d) (((int)(_d))&(BLKIF_HASHSZ-1))
-
static kmem_cache_t *blkif_cachep;
-static blkif_t *blkif_hash[BLKIF_HASHSZ];
-blkif_t *blkif_find(domid_t domid)
+blkif_t *alloc_blkif(domid_t domid)
{
- blkif_t *blkif = blkif_hash[BLKIF_HASH(domid)];
-
- while (blkif) {
- if (blkif->domid == domid) {
- blkif_get(blkif);
- return blkif;
- }
- blkif = blkif->hash_next;
- }
+ blkif_t *blkif;
blkif = kmem_cache_alloc(blkif_cachep, GFP_KERNEL);
if (!blkif)
memset(blkif, 0, sizeof(*blkif));
blkif->domid = domid;
blkif->status = DISCONNECTED;
- spin_lock_init(&blkif->vbd_lock);
spin_lock_init(&blkif->blk_ring_lock);
atomic_set(&blkif->refcnt, 1);
- blkif->hash_next = blkif_hash[BLKIF_HASH(domid)];
- blkif_hash[BLKIF_HASH(domid)] = blkif;
return blkif;
}
op.flags = GNTMAP_host_map;
op.ref = shared_page;
op.dom = blkif->domid;
-
+
BUG_ON( HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1) );
if (op.handle < 0) {
void free_blkif(blkif_t *blkif)
{
- blkif_t **pblkif;
evtchn_op_t op = { .cmd = EVTCHNOP_close };
op.u.close.port = blkif->evtchn;
vfree(blkif->blk_ring.sring);
}
- pblkif = &blkif_hash[BLKIF_HASH(blkif->domid)];
- while ( *pblkif != blkif )
- {
- BUG_ON(!*pblkif);
- pblkif = &(*pblkif)->hash_next;
- }
- *pblkif = blkif->hash_next;
- destroy_all_vbds(blkif);
kmem_cache_free(blkif_cachep, blkif);
}
{
blkif_cachep = kmem_cache_create("blkif_cache", sizeof(blkif_t),
0, 0, NULL, NULL);
- memset(blkif_hash, 0, sizeof(blkif_hash));
}
*
* Routines for managing virtual block devices (VBDs).
*
- * NOTE: vbd_lock protects updates to the rb_tree against concurrent lookups
- * in vbd_translate. All other lookups are implicitly protected because the
- * only caller (the control message dispatch routine) serializes the calls.
- *
* Copyright (c) 2003-2005, Keir Fraser & Steve Hand
*/
#include "common.h"
#include <asm-xen/xenbus.h>
-struct vbd {
- blkif_vdev_t handle; /* what the domain refers to this vbd as */
+struct vbd {
+ blkif_vdev_t handle; /* what the domain refers to this vbd as */
unsigned char readonly; /* Non-zero -> read-only */
unsigned char type; /* VDISK_xxx */
blkif_pdev_t pdevice; /* phys device that this vbd maps to */
struct block_device *bdev;
int active;
- rb_node_t rb; /* for linking into R-B tree lookup struct */
};
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
static inline dev_t vbd_map_devnum(blkif_pdev_t cookie)
-{ return MKDEV(cookie>>8, cookie&0xff); }
+{
+ return MKDEV(BLKIF_MAJOR(cookie), BLKIF_MINOR(cookie));
+}
#define vbd_sz(_v) ((_v)->bdev->bd_part ? \
(_v)->bdev->bd_part->nr_sects : (_v)->bdev->bd_disk->capacity)
#define bdev_put(_b) blkdev_put(_b)
struct vbd *vbd_create(blkif_t *blkif, blkif_vdev_t handle,
blkif_pdev_t pdevice, int readonly)
{
- struct vbd *vbd;
+ struct vbd *vbd, *err;
if ( unlikely((vbd = kmalloc(sizeof(struct vbd), GFP_KERNEL)) == NULL) )
{
return ERR_PTR(-ENOMEM);
}
+ blkif->vbd = vbd;
vbd->handle = handle;
vbd->readonly = readonly;
vbd->type = 0;
vbd->pdevice = pdevice;
- /* FIXME: Who frees vbd on failure? --RR */
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
vbd->bdev = open_by_devnum(
vbd_map_devnum(vbd->pdevice),
if ( IS_ERR(vbd->bdev) )
{
DPRINTK("vbd_creat: device %08x doesn't exist.\n", vbd->pdevice);
- return ERR_PTR(-ENOENT);
+ err = ERR_PTR(-ENOENT);
+ goto out;
}
if ( (vbd->bdev->bd_disk == NULL) )
{
DPRINTK("vbd_creat: device %08x doesn't exist.\n", vbd->pdevice);
bdev_put(vbd->bdev);
- return ERR_PTR(-ENOENT);
+ err = ERR_PTR(-ENOENT);
+ goto out;
}
if ( vbd->bdev->bd_disk->flags & GENHD_FL_CD )
vbd->type |= VDISK_CDROM;
if ( vbd->bdev->bd_disk->flags & GENHD_FL_REMOVABLE )
vbd->type |= VDISK_REMOVABLE;
-
#else
if ( (blk_size[MAJOR(vbd->pdevice)] == NULL) || (vbd_sz(vbd) == 0) )
{
DPRINTK("vbd_creat: device %08x doesn't exist.\n", vbd->pdevice);
- return ERR_PTR(-ENOENT);
+ err = ERR_PTR(-ENOENT);
+ goto out;
}
#endif
DPRINTK("Successful creation of handle=%04x (dom=%u)\n",
handle, blkif->domid);
return vbd;
+
+ out:
+ kfree(vbd);
+ return err;
}
void vbd_activate(blkif_t *blkif, struct vbd *vbd)
{
- rb_node_t **rb_p, *rb_parent = NULL;
- struct vbd *i;
BUG_ON(vbd_is_active(vbd));
- /* Find where to put it. */
- rb_p = &blkif->vbd_rb.rb_node;
- while ( *rb_p != NULL )
- {
- rb_parent = *rb_p;
- i = rb_entry(rb_parent, struct vbd, rb);
- if ( vbd->handle < i->handle )
- {
- rb_p = &rb_parent->rb_left;
- }
- else if ( vbd->handle > i->handle )
- {
- rb_p = &rb_parent->rb_right;
- }
- else
- {
- /* We never create two of same vbd, so not possible. */
- BUG();
- }
- }
-
/* Now we're active. */
vbd->active = 1;
blkif_get(blkif);
-
- spin_lock(&blkif->vbd_lock);
- rb_link_node(&vbd->rb, rb_parent, rb_p);
- rb_insert_color(&vbd->rb, &blkif->vbd_rb);
- spin_unlock(&blkif->vbd_lock);
}
void vbd_free(blkif_t *blkif, struct vbd *vbd)
{
if (vbd_is_active(vbd)) {
- spin_lock(&blkif->vbd_lock);
- rb_erase(&vbd->rb, &blkif->vbd_rb);
- spin_unlock(&blkif->vbd_lock);
blkif_put(blkif);
}
bdev_put(vbd->bdev);
kfree(vbd);
}
-void destroy_all_vbds(blkif_t *blkif)
-{
- struct vbd *vbd;
- rb_node_t *rb;
-
- spin_lock(&blkif->vbd_lock);
-
- while ( (rb = blkif->vbd_rb.rb_node) != NULL )
- {
- vbd = rb_entry(rb, struct vbd, rb);
- rb_erase(rb, &blkif->vbd_rb);
- spin_unlock(&blkif->vbd_lock);
- bdev_put(vbd->bdev);
- kfree(vbd);
- spin_lock(&blkif->vbd_lock);
- blkif_put(blkif);
- }
-
- spin_unlock(&blkif->vbd_lock);
-}
-
int vbd_translate(struct phys_req *req, blkif_t *blkif, int operation)
{
- struct vbd *vbd;
- rb_node_t *rb;
- int rc = -EACCES;
-
- /* Take the vbd_lock because another thread could be updating the tree. */
- spin_lock(&blkif->vbd_lock);
-
- rb = blkif->vbd_rb.rb_node;
- while ( rb != NULL )
- {
- vbd = rb_entry(rb, struct vbd, rb);
- if ( req->dev < vbd->handle )
- rb = rb->rb_left;
- else if ( req->dev > vbd->handle )
- rb = rb->rb_right;
- else
- goto found;
- }
-
- DPRINTK("vbd_translate; domain %u attempted to access "
- "non-existent VBD.\n", blkif->domid);
- rc = -ENODEV;
- goto out;
-
- found:
+ struct vbd *vbd = blkif->vbd;
+ int rc = -EACCES;
- if ( (operation == WRITE) && vbd->readonly )
+ if ((operation == WRITE) && vbd->readonly)
goto out;
- if ( unlikely((req->sector_number + req->nr_sects) > vbd_sz(vbd)) )
+ if (unlikely((req->sector_number + req->nr_sects) > vbd_sz(vbd)))
goto out;
req->dev = vbd->pdevice;
rc = 0;
out:
- spin_unlock(&blkif->vbd_lock);
return rc;
}